bitkeeper revision 1.1682 (42a42b05Cvw3LyFcUHH4i1_9HWbLBA)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Mon, 6 Jun 2005 10:52:53 +0000 (10:52 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Mon, 6 Jun 2005 10:52:53 +0000 (10:52 +0000)
Merge x86/32 and x86/64 usercopy routines.
Signed-off-by: Keir Fraser <keir@xensource.com>
.rootkeys
xen/arch/x86/usercopy.c [new file with mode: 0644]
xen/arch/x86/x86_32/usercopy.c [deleted file]
xen/arch/x86/x86_64/usercopy.c [deleted file]
xen/include/asm-x86/types.h
xen/include/asm-x86/uaccess.h
xen/include/asm-x86/x86_32/uaccess.h
xen/include/asm-x86/x86_64/uaccess.h

index bd90121badd301b597e5210745f4185eef4f8e08..52f533e1f1c65067346b1cbd188c2b1413ee4d7a 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79bc-Udq7ol-NX4q9XsYnN7A2Q xen/arch/x86/time.c
 3ddb79bccYVzXZJyVaxuv5T42Z1Fsw xen/arch/x86/trampoline.S
 3ddb79bcOftONV9h4QCxXOfiT0h91w xen/arch/x86/traps.c
+40e96d3ahBTZqbTViInnq0lM03vs7A xen/arch/x86/usercopy.c
 41c0c411tD3C7TpfDMiFTf7BaNd_Dg xen/arch/x86/vmx.c
 420951dcf1rSGnCH0AEYN2KjWGLG6A xen/arch/x86/vmx_intercept.c
 41c0c411ODt8uEmV-yUxpQLpqimE5Q xen/arch/x86/vmx_io.c
 3ddb79bcHwuCQDjBICDTSis52hWguw xen/arch/x86/x86_32/mm.c
 40f92331jfOlE7MfKwpdkEb1CEf23g xen/arch/x86/x86_32/seg_fixup.c
 42000d3ckiFc1qxa4AWqsd0t3lxuyw xen/arch/x86/x86_32/traps.c
-3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen/arch/x86/x86_32/usercopy.c
 3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen/arch/x86/x86_32/xen.lds
 41bf1717Ty3hwN3E9swdu8QfnvGqww xen/arch/x86/x86_64/asm-offsets.c
 40e96d3aLDI-nViMuYneD7VKYlZrVg xen/arch/x86/x86_64/entry.S
 41bf1717XhPz_dNT5OKSjgmbFuWBuA xen/arch/x86/x86_64/mm.c
 42000d3cMb8o1WuFBXC07c8i3lPZBw xen/arch/x86/x86_64/traps.c
-40e96d3ahBTZqbTViInnq0lM03vs7A xen/arch/x86/x86_64/usercopy.c
 40e96d3akN3Hu_J5Bk-WXD8OGscrYQ xen/arch/x86/x86_64/xen.lds
 422f27c8J9DQfCpegccMid59XhSmGA xen/arch/x86/x86_emulate.c
 3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen/common/Makefile
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
new file mode 100644 (file)
index 0000000..f16c4da
--- /dev/null
@@ -0,0 +1,139 @@
+/* 
+ * User address space access functions.
+ *
+ * Copyright 1997 Andi Kleen <ak@muc.de>
+ * Copyright 1997 Linus Torvalds
+ * Copyright 2002 Andi Kleen <ak@suse.de>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <asm/uaccess.h>
+
+unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
+{
+       unsigned long __d0, __d1, __d2, __n = n;
+       __asm__ __volatile__(
+               "       cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
+               "       jbe  1f\n"
+               "       mov  %1,%0\n"
+               "       neg  %0\n"
+               "       and  $"STR(BYTES_PER_LONG-1)",%0\n"
+               "       sub  %0,%3\n"
+               "4:     rep; movsb\n" /* make 'to' address aligned */
+               "       mov  %3,%0\n"
+               "       shr  $"STR(LONG_BYTEORDER)",%0\n"
+               "       and  $"STR(BYTES_PER_LONG-1)",%3\n"
+               "       .align 2,0x90\n"
+               "0:     rep; movs"__OS"\n" /* as many words as possible... */
+               "       mov  %3,%0\n"
+               "1:     rep; movsb\n" /* ...remainder copied as bytes */
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "5:     add %3,%0\n"
+               "       jmp 2b\n"
+               "3:     lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
+               "       jmp 2b\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       "__FIXUP_ALIGN"\n"
+               "       "__FIXUP_WORD" 4b,5b\n"
+               "       "__FIXUP_WORD" 0b,3b\n"
+               "       "__FIXUP_WORD" 1b,2b\n"
+               ".previous"
+               : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
+               : "3"(__n), "0"(__n), "1"(to), "2"(from)
+               : "memory");
+       return (unsigned)__n;
+}
+
+unsigned long
+__copy_from_user_ll(void *to, const void __user *from, unsigned n)
+{
+       unsigned long __d0, __d1, __d2, __n = n;
+       __asm__ __volatile__(
+               "       cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
+               "       jbe  1f\n"
+               "       mov  %1,%0\n"
+               "       neg  %0\n"
+               "       and  $"STR(BYTES_PER_LONG-1)",%0\n"
+               "       sub  %0,%3\n"
+               "4:     rep; movsb\n" /* make 'to' address aligned */
+               "       mov  %3,%0\n"
+               "       shr  $"STR(LONG_BYTEORDER)",%0\n"
+               "       and  $"STR(BYTES_PER_LONG-1)",%3\n"
+               "       .align 2,0x90\n"
+               "0:     rep; movs"__OS"\n" /* as many words as possible... */
+               "       mov  %3,%0\n"
+               "1:     rep; movsb\n" /* ...remainder copied as bytes */
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "5:     add %3,%0\n"
+               "       jmp 6f\n"
+               "3:     lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
+               "6:     push %0\n"
+               "       push %%"__OP"ax\n"
+               "       xor  %%eax,%%eax\n"
+               "       rep; stosb\n"
+               "       pop  %%"__OP"ax\n"
+               "       pop  %0\n"
+               "       jmp 2b\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       "__FIXUP_ALIGN"\n"
+               "       "__FIXUP_WORD" 4b,5b\n"
+               "       "__FIXUP_WORD" 0b,3b\n"
+               "       "__FIXUP_WORD" 1b,6b\n"
+               ".previous"
+               : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
+               : "3"(__n), "0"(__n), "1"(to), "2"(from)
+               : "memory");
+       return (unsigned)__n;
+}
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+copy_to_user(void __user *to, const void *from, unsigned n)
+{
+       if (access_ok(to, n))
+               n = __copy_to_user(to, from, n);
+       return n;
+}
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long
+copy_from_user(void *to, const void __user *from, unsigned n)
+{
+       if (access_ok(from, n))
+               n = __copy_from_user(to, from, n);
+       else
+               memset(to, 0, n);
+       return n;
+}
diff --git a/xen/arch/x86/x86_32/usercopy.c b/xen/arch/x86/x86_32/usercopy.c
deleted file mode 100644 (file)
index c05ffd4..0000000
+++ /dev/null
@@ -1,443 +0,0 @@
-/* 
- * User address space access functions.
- * The non inlined parts of asm-i386/uaccess.h are here.
- *
- * Copyright 1997 Andi Kleen <ak@muc.de>
- * Copyright 1997 Linus Torvalds
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <asm/uaccess.h>
-
-static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
-{
-#ifdef CONFIG_X86_INTEL_USERCOPY
-       if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
-               return 0;
-#endif
-       return 1;
-}
-#define movsl_is_ok(a1,a2,n) \
-       __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
-
-
-/*
- * Zero Userspace
- */
-
-#define __do_clear_user(addr,size)                                     \
-do {                                                                   \
-       int __d0;                                                       \
-       __asm__ __volatile__(                                           \
-               "0:     rep; stosl\n"                                   \
-               "       movl %2,%0\n"                                   \
-               "1:     rep; stosb\n"                                   \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "3:     lea 0(%2,%0,4),%0\n"                            \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 4\n"                                     \
-               "       .long 0b,3b\n"                                  \
-               "       .long 1b,2b\n"                                  \
-               ".previous"                                             \
-               : "=&c"(size), "=&D" (__d0)                             \
-               : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0));     \
-} while (0)
-
-/**
- * clear_user: - Zero a block of memory in user space.
- * @to:   Destination address, in user space.
- * @n:    Number of bytes to zero.
- *
- * Zero a block of memory in user space.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned long
-clear_user(void __user *to, unsigned long n)
-{
-       if (access_ok(to, n))
-               __do_clear_user(to, n);
-       return n;
-}
-
-/**
- * __clear_user: - Zero a block of memory in user space, with less checking.
- * @to:   Destination address, in user space.
- * @n:    Number of bytes to zero.
- *
- * Zero a block of memory in user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned long
-__clear_user(void __user *to, unsigned long n)
-{
-       __do_clear_user(to, n);
-       return n;
-}
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
-static unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
-{
-       int d0, d1;
-       __asm__ __volatile__(
-                      "       .align 2,0x90\n"
-                      "1:     movl 32(%4), %%eax\n"
-                      "       cmpl $67, %0\n"
-                      "       jbe 3f\n"
-                      "2:     movl 64(%4), %%eax\n"
-                      "       .align 2,0x90\n"
-                      "3:     movl 0(%4), %%eax\n"
-                      "4:     movl 4(%4), %%edx\n"
-                      "5:     movl %%eax, 0(%3)\n"
-                      "6:     movl %%edx, 4(%3)\n"
-                      "7:     movl 8(%4), %%eax\n"
-                      "8:     movl 12(%4),%%edx\n"
-                      "9:     movl %%eax, 8(%3)\n"
-                      "10:    movl %%edx, 12(%3)\n"
-                      "11:    movl 16(%4), %%eax\n"
-                      "12:    movl 20(%4), %%edx\n"
-                      "13:    movl %%eax, 16(%3)\n"
-                      "14:    movl %%edx, 20(%3)\n"
-                      "15:    movl 24(%4), %%eax\n"
-                      "16:    movl 28(%4), %%edx\n"
-                      "17:    movl %%eax, 24(%3)\n"
-                      "18:    movl %%edx, 28(%3)\n"
-                      "19:    movl 32(%4), %%eax\n"
-                      "20:    movl 36(%4), %%edx\n"
-                      "21:    movl %%eax, 32(%3)\n"
-                      "22:    movl %%edx, 36(%3)\n"
-                      "23:    movl 40(%4), %%eax\n"
-                      "24:    movl 44(%4), %%edx\n"
-                      "25:    movl %%eax, 40(%3)\n"
-                      "26:    movl %%edx, 44(%3)\n"
-                      "27:    movl 48(%4), %%eax\n"
-                      "28:    movl 52(%4), %%edx\n"
-                      "29:    movl %%eax, 48(%3)\n"
-                      "30:    movl %%edx, 52(%3)\n"
-                      "31:    movl 56(%4), %%eax\n"
-                      "32:    movl 60(%4), %%edx\n"
-                      "33:    movl %%eax, 56(%3)\n"
-                      "34:    movl %%edx, 60(%3)\n"
-                      "       addl $-64, %0\n"
-                      "       addl $64, %4\n"
-                      "       addl $64, %3\n"
-                      "       cmpl $63, %0\n"
-                      "       ja  1b\n"
-                      "35:    movl  %0, %%eax\n"
-                      "       shrl  $2, %0\n"
-                      "       andl  $3, %%eax\n"
-                      "       cld\n"
-                      "99:    rep; movsl\n"
-                      "36:    movl %%eax, %0\n"
-                      "37:    rep; movsb\n"
-                      "100:\n"
-                      ".section .fixup,\"ax\"\n"
-                      "101:   lea 0(%%eax,%0,4),%0\n"
-                      "       jmp 100b\n"
-                      ".previous\n"
-                      ".section __ex_table,\"a\"\n"
-                      "       .align 4\n"
-                      "       .long 1b,100b\n"
-                      "       .long 2b,100b\n"
-                      "       .long 3b,100b\n"
-                      "       .long 4b,100b\n"
-                      "       .long 5b,100b\n"
-                      "       .long 6b,100b\n"
-                      "       .long 7b,100b\n"
-                      "       .long 8b,100b\n"
-                      "       .long 9b,100b\n"
-                      "       .long 10b,100b\n"
-                      "       .long 11b,100b\n"
-                      "       .long 12b,100b\n"
-                      "       .long 13b,100b\n"
-                      "       .long 14b,100b\n"
-                      "       .long 15b,100b\n"
-                      "       .long 16b,100b\n"
-                      "       .long 17b,100b\n"
-                      "       .long 18b,100b\n"
-                      "       .long 19b,100b\n"
-                      "       .long 20b,100b\n"
-                      "       .long 21b,100b\n"
-                      "       .long 22b,100b\n"
-                      "       .long 23b,100b\n"
-                      "       .long 24b,100b\n"
-                      "       .long 25b,100b\n"
-                      "       .long 26b,100b\n"
-                      "       .long 27b,100b\n"
-                      "       .long 28b,100b\n"
-                      "       .long 29b,100b\n"
-                      "       .long 30b,100b\n"
-                      "       .long 31b,100b\n"
-                      "       .long 32b,100b\n"
-                      "       .long 33b,100b\n"
-                      "       .long 34b,100b\n"
-                      "       .long 35b,100b\n"
-                      "       .long 36b,100b\n"
-                      "       .long 37b,100b\n"
-                      "       .long 99b,101b\n"
-                      ".previous"
-                      : "=&c"(size), "=&D" (d0), "=&S" (d1)
-                      :  "1"(to), "2"(from), "0"(size)
-                      : "eax", "edx", "memory");
-       return size;
-}
-
-static unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
-{
-       int d0, d1;
-       __asm__ __volatile__(
-                      "        .align 2,0x90\n"
-                      "0:      movl 32(%4), %%eax\n"
-                      "        cmpl $67, %0\n"      
-                      "        jbe 2f\n"            
-                      "1:      movl 64(%4), %%eax\n"
-                      "        .align 2,0x90\n"     
-                      "2:      movl 0(%4), %%eax\n" 
-                      "21:     movl 4(%4), %%edx\n" 
-                      "        movl %%eax, 0(%3)\n" 
-                      "        movl %%edx, 4(%3)\n" 
-                      "3:      movl 8(%4), %%eax\n" 
-                      "31:     movl 12(%4),%%edx\n" 
-                      "        movl %%eax, 8(%3)\n" 
-                      "        movl %%edx, 12(%3)\n"
-                      "4:      movl 16(%4), %%eax\n"
-                      "41:     movl 20(%4), %%edx\n"
-                      "        movl %%eax, 16(%3)\n"
-                      "        movl %%edx, 20(%3)\n"
-                      "10:     movl 24(%4), %%eax\n"
-                      "51:     movl 28(%4), %%edx\n"
-                      "        movl %%eax, 24(%3)\n"
-                      "        movl %%edx, 28(%3)\n"
-                      "11:     movl 32(%4), %%eax\n"
-                      "61:     movl 36(%4), %%edx\n"
-                      "        movl %%eax, 32(%3)\n"
-                      "        movl %%edx, 36(%3)\n"
-                      "12:     movl 40(%4), %%eax\n"
-                      "71:     movl 44(%4), %%edx\n"
-                      "        movl %%eax, 40(%3)\n"
-                      "        movl %%edx, 44(%3)\n"
-                      "13:     movl 48(%4), %%eax\n"
-                      "81:     movl 52(%4), %%edx\n"
-                      "        movl %%eax, 48(%3)\n"
-                      "        movl %%edx, 52(%3)\n"
-                      "14:     movl 56(%4), %%eax\n"
-                      "91:     movl 60(%4), %%edx\n"
-                      "        movl %%eax, 56(%3)\n"
-                      "        movl %%edx, 60(%3)\n"
-                      "        addl $-64, %0\n"     
-                      "        addl $64, %4\n"      
-                      "        addl $64, %3\n"      
-                      "        cmpl $63, %0\n"      
-                      "        ja  0b\n"            
-                      "5:      movl  %0, %%eax\n"   
-                      "        shrl  $2, %0\n"      
-                      "        andl $3, %%eax\n"    
-                      "        cld\n"               
-                      "6:      rep; movsl\n"   
-                      "        movl %%eax,%0\n"
-                      "7:      rep; movsb\n"   
-                      "8:\n"                   
-                      ".section .fixup,\"ax\"\n"
-                      "9:      lea 0(%%eax,%0,4),%0\n" 
-                      "16:     pushl %0\n"     
-                      "        pushl %%eax\n"  
-                      "        xorl %%eax,%%eax\n"
-                      "        rep; stosb\n"   
-                      "        popl %%eax\n"   
-                      "        popl %0\n"      
-                      "        jmp 8b\n"       
-                      ".previous\n"            
-                      ".section __ex_table,\"a\"\n"
-                      "        .align 4\n"        
-                      "        .long 0b,16b\n"  
-                      "        .long 1b,16b\n"
-                      "        .long 2b,16b\n"
-                      "        .long 21b,16b\n"
-                      "        .long 3b,16b\n" 
-                      "        .long 31b,16b\n"
-                      "        .long 4b,16b\n" 
-                      "        .long 41b,16b\n"
-                      "        .long 10b,16b\n"
-                      "        .long 51b,16b\n"
-                      "        .long 11b,16b\n"
-                      "        .long 61b,16b\n"
-                      "        .long 12b,16b\n"
-                      "        .long 71b,16b\n"
-                      "        .long 13b,16b\n"
-                      "        .long 81b,16b\n"
-                      "        .long 14b,16b\n"
-                      "        .long 91b,16b\n"
-                      "        .long 6b,9b\n"  
-                      "        .long 7b,16b\n" 
-                      ".previous"              
-                      : "=&c"(size), "=&D" (d0), "=&S" (d1)
-                      :  "1"(to), "2"(from), "0"(size)
-                      : "eax", "edx", "memory");
-       return size;
-}
-#else
-/*
- * Leave these declared but undefined.  They should not be any references to
- * them
- */
-unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
-unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size);
-#endif /* CONFIG_X86_INTEL_USERCOPY */
-
-/* Generic arbitrary sized copy.  */
-#define __copy_user(to,from,size)                                      \
-do {                                                                   \
-       int __d0, __d1, __d2;                                           \
-       __asm__ __volatile__(                                           \
-               "       cmp  $7,%0\n"                                   \
-               "       jbe  1f\n"                                      \
-               "       movl %1,%0\n"                                   \
-               "       negl %0\n"                                      \
-               "       andl $7,%0\n"                                   \
-               "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
-               "       movl %3,%0\n"                                   \
-               "       shrl $2,%0\n"                                   \
-               "       andl $3,%3\n"                                   \
-               "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
-               "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "5:     addl %3,%0\n"                                   \
-               "       jmp 2b\n"                                       \
-               "3:     lea 0(%3,%0,4),%0\n"                            \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 4\n"                                     \
-               "       .long 4b,5b\n"                                  \
-               "       .long 0b,3b\n"                                  \
-               "       .long 1b,2b\n"                                  \
-               ".previous"                                             \
-               : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)   \
-               : "3"(size), "0"(size), "1"(to), "2"(from)              \
-               : "memory");                                            \
-} while (0)
-
-#define __copy_user_zeroing(to,from,size)                              \
-do {                                                                   \
-       int __d0, __d1, __d2;                                           \
-       __asm__ __volatile__(                                           \
-               "       cmp  $7,%0\n"                                   \
-               "       jbe  1f\n"                                      \
-               "       movl %1,%0\n"                                   \
-               "       negl %0\n"                                      \
-               "       andl $7,%0\n"                                   \
-               "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
-               "       movl %3,%0\n"                                   \
-               "       shrl $2,%0\n"                                   \
-               "       andl $3,%3\n"                                   \
-               "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
-               "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "5:     addl %3,%0\n"                                   \
-               "       jmp 6f\n"                                       \
-               "3:     lea 0(%3,%0,4),%0\n"                            \
-               "6:     pushl %0\n"                                     \
-               "       pushl %%eax\n"                                  \
-               "       xorl %%eax,%%eax\n"                             \
-               "       rep; stosb\n"                                   \
-               "       popl %%eax\n"                                   \
-               "       popl %0\n"                                      \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 4\n"                                     \
-               "       .long 4b,5b\n"                                  \
-               "       .long 0b,3b\n"                                  \
-               "       .long 1b,6b\n"                                  \
-               ".previous"                                             \
-               : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)   \
-               : "3"(size), "0"(size), "1"(to), "2"(from)              \
-               : "memory");                                            \
-} while (0)
-
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
-{
-       if (movsl_is_ok(to, from, n))
-               __copy_user(to, from, n);
-       else
-               n = __copy_user_intel(to, from, n);
-       return n;
-}
-
-unsigned long
-__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
-{
-       if (movsl_is_ok(to, from, n))
-               __copy_user_zeroing(to, from, n);
-       else
-               n = __copy_user_zeroing_intel(to, from, n);
-       return n;
-}
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       if (access_ok(to, n))
-               n = __copy_to_user(to, from, n);
-       return n;
-}
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       if (access_ok(from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
-}
diff --git a/xen/arch/x86/x86_64/usercopy.c b/xen/arch/x86/x86_64/usercopy.c
deleted file mode 100644 (file)
index 685cd7b..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-/* 
- * User address space access functions.
- *
- * Copyright 1997 Andi Kleen <ak@muc.de>
- * Copyright 1997 Linus Torvalds
- * Copyright 2002 Andi Kleen <ak@suse.de>
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <asm/uaccess.h>
-
-/*
- * Zero Userspace
- */
-
-unsigned long __clear_user(void *addr, unsigned long size)
-{
-       long __d0;
-       /* no memory constraint because it doesn't change any memory gcc knows
-          about */
-       asm volatile(
-               "       testq  %[size8],%[size8]\n"
-               "       jz     4f\n"
-               "0:     movq %[zero],(%[dst])\n"
-               "       addq   %[eight],%[dst]\n"
-               "       decl %%ecx ; jnz   0b\n"
-               "4:     movq  %[size1],%%rcx\n"
-               "       testl %%ecx,%%ecx\n"
-               "       jz     2f\n"
-               "1:     movb   %b[zero],(%[dst])\n"
-               "       incq   %[dst]\n"
-               "       decl %%ecx ; jnz  1b\n"
-               "2:\n"
-               ".section .fixup,\"ax\"\n"
-               "3:     lea 0(%[size1],%[size8],8),%[size8]\n"
-               "       jmp 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .align 8\n"
-               "       .quad 0b,3b\n"
-               "       .quad 1b,2b\n"
-               ".previous"
-               : [size8] "=c"(size), [dst] "=&D" (__d0)
-               : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
-                 [zero] "r" (0UL), [eight] "r" (8UL));
-       return size;
-}
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
-{
-       unsigned long __d0, __d1, __d2, __n = n;
-       __asm__ __volatile__(
-               "       cmpq  $15,%0\n"
-               "       jbe  1f\n"
-               "       mov  %1,%0\n"
-               "       neg  %0\n"
-               "       and  $7,%0\n"
-               "       sub  %0,%3\n"
-               "4:     rep; movsb\n" /* make 'to' address aligned */
-               "       mov  %3,%0\n"
-               "       shr  $3,%0\n"
-               "       and  $7,%3\n"
-               "       .align 2,0x90\n"
-               "0:     rep; movsq\n" /* as many quadwords as possible... */
-               "       mov  %3,%0\n"
-               "1:     rep; movsb\n" /* ...remainder copied as bytes */
-               "2:\n"
-               ".section .fixup,\"ax\"\n"
-               "5:     add %3,%0\n"
-               "       jmp 2b\n"
-               "3:     lea 0(%3,%0,8),%0\n"
-               "       jmp 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .align 8\n"
-               "       .quad 4b,5b\n"
-               "       .quad 0b,3b\n"
-               "       .quad 1b,2b\n"
-               ".previous"
-               : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-               : "3"(__n), "0"(__n), "1"(to), "2"(from)
-               : "memory");
-       return (unsigned)__n;
-}
-
-unsigned long
-__copy_from_user_ll(void *to, const void __user *from, unsigned n)
-{
-       unsigned long __d0, __d1, __d2, __n = n;
-       __asm__ __volatile__(
-               "       cmp  $15,%0\n"
-               "       jbe  1f\n"
-               "       mov  %1,%0\n"
-               "       neg  %0\n"
-               "       and  $7,%0\n"
-               "       sub  %0,%3\n"
-               "4:     rep; movsb\n" /* make 'to' address aligned */
-               "       mov  %3,%0\n"
-               "       shr  $3,%0\n"
-               "       and  $7,%3\n"
-               "       .align 2,0x90\n"
-               "0:     rep; movsq\n" /* as many quadwords as possible... */
-               "       mov  %3,%0\n"
-               "1:     rep; movsb\n" /* ...remainder copied as bytes */
-               "2:\n"
-               ".section .fixup,\"ax\"\n"
-               "5:     add %3,%0\n"
-               "       jmp 6f\n"
-               "3:     lea 0(%3,%0,8),%0\n"
-               "6:     push %0\n"
-               "       push %%rax\n"
-               "       xor  %%rax,%%rax\n"
-               "       rep; stosb\n"
-               "       pop  %%rax\n"
-               "       pop  %0\n"
-               "       jmp 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .align 8\n"
-               "       .quad 4b,5b\n"
-               "       .quad 0b,3b\n"
-               "       .quad 1b,6b\n"
-               ".previous"
-               : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-               : "3"(__n), "0"(__n), "1"(to), "2"(from)
-               : "memory");
-       return (unsigned)__n;
-}
-
-unsigned long clear_user(void *to, unsigned long n)
-{
-       if (access_ok(to, n))
-               return __clear_user(to, n);
-       return n;
-}
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long
-copy_to_user(void __user *to, const void *from, unsigned n)
-{
-       if (access_ok(to, n))
-               n = __copy_to_user(to, from, n);
-       return n;
-}
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long
-copy_from_user(void *to, const void __user *from, unsigned n)
-{
-       if (access_ok(from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
-}
index 5dd6c6c1afe249d0610b8bfd0fb6a21ce50200f4..55a3e0e5c9e5d7b9e931f7b83496c7844b8acf3c 100644 (file)
@@ -43,6 +43,8 @@ typedef unsigned int u32;
 typedef signed long long s64;
 typedef unsigned long long u64;
 #define BITS_PER_LONG 32
+#define BYTES_PER_LONG 4
+#define LONG_BYTEORDER 2
 typedef unsigned int size_t;
 #if defined(CONFIG_X86_PAE)
 typedef u64 physaddr_t;
@@ -53,6 +55,8 @@ typedef u32 physaddr_t;
 typedef signed long s64;
 typedef unsigned long u64;
 #define BITS_PER_LONG 64
+#define BYTES_PER_LONG 8
+#define LONG_BYTEORDER 3
 typedef unsigned long size_t;
 typedef u64 physaddr_t;
 #endif
index 46c02ecef4910906b5fc723079dac2825a08945f..e5e32d0938de8a904f867624929bedf466fcf12b 100644 (file)
 #ifndef __X86_UACCESS_H__
 #define __X86_UACCESS_H__
 
+#include <xen/config.h>
+#include <xen/compiler.h>
+#include <xen/errno.h>
+#include <xen/prefetch.h>
+#include <asm/page.h>
+
+#define __user
+
 #ifdef __x86_64__
 #include <asm/x86_64/uaccess.h>
 #else
 #include <asm/x86_32/uaccess.h>
 #endif
 
+unsigned long copy_to_user(void *to, const void *from, unsigned len); 
+unsigned long copy_from_user(void *to, const void *from, unsigned len); 
+/* Handles exceptions in both to and from, but doesn't do access_ok */
+unsigned long __copy_to_user_ll(void *to, const void *from, unsigned n);
+unsigned long __copy_from_user_ll(void *to, const void *from, unsigned n);
+
+extern long __get_user_bad(void);
+extern void __put_user_bad(void);
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x,ptr)        \
+  __get_user_check((x),(ptr),sizeof(*(ptr)))
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x,ptr)                                                        \
+  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x,ptr) \
+  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x,ptr) \
+  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __put_user_nocheck(x,ptr,size)                         \
+({                                                             \
+       long __pu_err;                                          \
+       __put_user_size((x),(ptr),(size),__pu_err,-EFAULT);     \
+       __pu_err;                                               \
+})
+
+#define __put_user_check(x,ptr,size)                                   \
+({                                                                     \
+       long __pu_err = -EFAULT;                                        \
+       __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
+       if (__addr_ok(__pu_addr))                                       \
+               __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
+       __pu_err;                                                       \
+})                                                     
+
+#define __get_user_nocheck(x,ptr,size)                         \
+({                                                             \
+       long __gu_err, __gu_val;                                \
+       __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
+       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       __gu_err;                                               \
+})
+
+#define __get_user_check(x,ptr,size)                                   \
+({                                                                     \
+       long __gu_err, __gu_val;                                        \
+       __typeof__(*(ptr)) __user *__gu_addr = (ptr);                   \
+       __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);    \
+       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;                  \
+       __gu_err;                                                       \
+})                                                     
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       __asm__ __volatile__(                                           \
+               "1:     mov"itype" %"rtype"1,%2\n"                      \
+               "2:\n"                                                  \
+               ".section .fixup,\"ax\"\n"                              \
+               "3:     mov %3,%0\n"                                    \
+               "       jmp 2b\n"                                       \
+               ".previous\n"                                           \
+               ".section __ex_table,\"a\"\n"                           \
+               "       "__FIXUP_ALIGN"\n"                              \
+               "       "__FIXUP_WORD" 1b,3b\n"                         \
+               ".previous"                                             \
+               : "=r"(err)                                             \
+               : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       __asm__ __volatile__(                                           \
+               "1:     mov"itype" %2,%"rtype"1\n"                      \
+               "2:\n"                                                  \
+               ".section .fixup,\"ax\"\n"                              \
+               "3:     mov %3,%0\n"                                    \
+               "       xor"itype" %"rtype"1,%"rtype"1\n"               \
+               "       jmp 2b\n"                                       \
+               ".previous\n"                                           \
+               ".section __ex_table,\"a\"\n"                           \
+               "       "__FIXUP_ALIGN"\n"                              \
+               "       "__FIXUP_WORD" 1b,3b\n"                         \
+               ".previous"                                             \
+               : "=r"(err), ltype (x)                                  \
+               : "m"(__m(addr)), "i"(errret), "0"(err))
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from kernel space to user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+static always_inline unsigned long
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+    if (__builtin_constant_p(n)) {
+        unsigned long ret;
+
+        switch (n) {
+        case 1:
+            __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
+            return ret;
+        case 2:
+            __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
+            return ret;
+        case 4:
+            __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
+            return ret;
+        case 8:
+            __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8);
+            return ret;
+        }
+    }
+    return __copy_to_user_ll(to, from, n);
+}
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+static always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+    if (__builtin_constant_p(n)) {
+        unsigned long ret;
+
+        switch (n) {
+        case 1:
+            __get_user_size(*(u8 *)to, from, 1, ret, 1);
+            return ret;
+        case 2:
+            __get_user_size(*(u16 *)to, from, 2, ret, 2);
+            return ret;
+        case 4:
+            __get_user_size(*(u32 *)to, from, 4, ret, 4);
+            return ret;
+        case 8:
+            __get_user_size(*(u64*)to, from, 8, ret, 8);
+            return ret; 
+        }
+    }
+    return __copy_from_user_ll(to, from, n);
+}
+
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
index 772aa3f990ea48b7829756d2181932c71417b151..eb9b87ceb140c37bd320bb9ebb0e4d49c8e93094 100644 (file)
@@ -1,25 +1,6 @@
 #ifndef __i386_UACCESS_H
 #define __i386_UACCESS_H
 
-/*
- * User space memory access functions
- */
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/prefetch.h>
-#include <xen/string.h>
-
-#define __user
-
-/*
- * movsl can be slow when source and dest are not both 8-byte aligned
- */
-#ifdef CONFIG_X86_INTEL_USERCOPY
-extern struct movsl_mask {
-    int mask;
-} __cacheline_aligned movsl_mask;
-#endif
-
 #define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
 
 /*
@@ -41,112 +22,7 @@ extern struct movsl_mask {
 #define array_access_ok(addr,count,size) \
     (likely(count < (~0UL/size)) && access_ok(addr,count*size))
 
-extern long __get_user_bad(void);
-extern void __put_user_bad(void);
-
-/**
- * get_user: - Get a simple variable from user space.
- * @x:   Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple variable from user space to kernel
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define get_user(x,ptr)        \
-  __get_user_check((x),(ptr),sizeof(*(ptr)))
-
-/**
- * put_user: - Write a simple value into user space.
- * @x:   Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple value from kernel space to user
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define put_user(x,ptr)                                                        \
-  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-
-/**
- * __get_user: - Get a simple variable from user space, with less checking.
- * @x:   Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple variable from user space to kernel
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define __get_user(x,ptr) \
-  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-
-
-/**
- * __put_user: - Write a simple value into user space, with less checking.
- * @x:   Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple value from kernel space to user
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define __put_user(x,ptr) \
-  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __put_user_nocheck(x,ptr,size)                         \
-({                                                             \
-       long __pu_err;                                          \
-       __put_user_size((x),(ptr),(size),__pu_err,-EFAULT);     \
-       __pu_err;                                               \
-})
-
-#define __put_user_check(x,ptr,size)                                   \
-({                                                                     \
-       long __pu_err = -EFAULT;                                        \
-       __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       if (__addr_ok(__pu_addr))                                       \
-               __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
-       __pu_err;                                                       \
-})                                                     
-
-#define __put_user_u64(x, addr, err)                           \
+#define __put_user_u64(x, addr, retval, errret)                        \
        __asm__ __volatile__(                                   \
                "1:     movl %%eax,0(%2)\n"                     \
                "2:     movl %%edx,4(%2)\n"                     \
@@ -160,8 +36,8 @@ extern void __put_user_bad(void);
                "       .long 1b,4b\n"                          \
                "       .long 2b,4b\n"                          \
                ".previous"                                     \
-               : "=r"(err)                                     \
-               : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+               : "=r"(retval)                                  \
+               : "A" (x), "r" (addr), "i"(errret), "0"(retval))
 
 #define __put_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \
@@ -170,52 +46,29 @@ do {                                                                       \
        case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
        case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
        case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \
-       case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
-         default: __put_user_bad();                                    \
+       case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval,errret);break;\
+       default: __put_user_bad();                                      \
        }                                                               \
 } while (0)
 
-struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       __asm__ __volatile__(                                           \
-               "1:     mov"itype" %"rtype"1,%2\n"                      \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "3:     movl %3,%0\n"                                   \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 4\n"                                     \
-               "       .long 1b,3b\n"                                  \
-               ".previous"                                             \
-               : "=r"(err)                                             \
-               : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size)                         \
-({                                                             \
-       long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
-       __gu_err;                                               \
-})
-
-#define __get_user_check(x,ptr,size)                                   \
-({                                                                     \
-       long __gu_err, __gu_val;                                        \
-       __typeof__(*(ptr)) __user *__gu_addr = (ptr);                   \
-       __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);    \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
-       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;                  \
-       __gu_err;                                                       \
-})                                                     
+#define __get_user_u64(x, addr, retval, errret)                        \
+       __asm__ __volatile__(                                   \
+               "1:     movl 0(%2),%%eax\n"                     \
+               "2:     movl 4(%2),%%edx\n"                     \
+               "3:\n"                                          \
+               ".section .fixup,\"ax\"\n"                      \
+               "4:     movl %3,%0\n"                           \
+               "       xorl %%eax,%%eax\n"                     \
+               "       xorl %%edx,%%edx\n"                     \
+               "       jmp 3b\n"                               \
+               ".previous\n"                                   \
+               ".section __ex_table,\"a\"\n"                   \
+               "       .align 4\n"                             \
+               "       .long 1b,4b\n"                          \
+               "       .long 2b,4b\n"                          \
+               ".previous"                                     \
+               : "=r" (retval), "=A" (x)                       \
+               : "r" (addr), "i"(errret), "0"(retval))
 
 #define __get_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \
@@ -224,115 +77,9 @@ do {                                                                       \
        case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
        case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
        case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break;  \
+       case 8: __get_user_u64(x,ptr,retval,errret);break;              \
        default: (x) = __get_user_bad();                                \
        }                                                               \
 } while (0)
 
-#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       __asm__ __volatile__(                                           \
-               "1:     mov"itype" %2,%"rtype"1\n"                      \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "3:     movl %3,%0\n"                                   \
-               "       xor"itype" %"rtype"1,%"rtype"1\n"               \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 4\n"                                     \
-               "       .long 1b,3b\n"                                  \
-               ".previous"                                             \
-               : "=r"(err), ltype (x)                                  \
-               : "m"(__m(addr)), "i"(errret), "0"(err))
-
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
-unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
-
-/*
- * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
- * we return the initial request size (1, 2 or 4), as copy_*_user should do.
- * If a store crosses a page boundary and gets a fault, the x86 will not write
- * anything, so this is accurate.
- */
-
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-static always_inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-    if (__builtin_constant_p(n)) {
-        unsigned long ret;
-
-        switch (n) {
-        case 1:
-            __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
-            return ret;
-        case 2:
-            __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
-            return ret;
-        case 4:
-            __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
-            return ret;
-        }
-    }
-    return __copy_to_user_ll(to, from, n);
-}
-
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-static always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-    if (__builtin_constant_p(n)) {
-        unsigned long ret;
-
-        switch (n) {
-        case 1:
-            __get_user_size(*(u8 *)to, from, 1, ret, 1);
-            return ret;
-        case 2:
-            __get_user_size(*(u16 *)to, from, 2, ret, 2);
-            return ret;
-        case 4:
-            __get_user_size(*(u32 *)to, from, 4, ret, 4);
-            return ret;
-        }
-    }
-    return __copy_from_user_ll(to, from, n);
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n);
-unsigned long copy_from_user(void *to,
-                             const void __user *from, unsigned long n);
-
-unsigned long clear_user(void __user *mem, unsigned long len);
-unsigned long __clear_user(void __user *mem, unsigned long len);
-
 #endif /* __i386_UACCESS_H */
index d655e01f83b077bceedb18f9a3124e8a30c35262..4d5f65c890525ae8e43fe84ca11bcbac5c709a46 100644 (file)
@@ -1,17 +1,6 @@
 #ifndef __X86_64_UACCESS_H
 #define __X86_64_UACCESS_H
 
-/*
- * User space memory access functions
- */
-#include <xen/config.h>
-#include <xen/compiler.h>
-#include <xen/errno.h>
-#include <xen/prefetch.h>
-#include <asm/page.h>
-
-#define __user
-
 /*
  * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
  * This is also valid for range checks (addr, addr+size). As long as the
 
 #define array_access_ok(addr, count, size) (__addr_ok(addr))
 
-extern long __get_user_bad(void);
-extern void __put_user_bad(void);
-
-/**
- * get_user: - Get a simple variable from user space.
- * @x:   Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple variable from user space to kernel
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define get_user(x,ptr)        \
-  __get_user_check((x),(ptr),sizeof(*(ptr)))
-
-/**
- * put_user: - Write a simple value into user space.
- * @x:   Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple value from kernel space to user
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define put_user(x,ptr)                                                        \
-  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-
-/**
- * __get_user: - Get a simple variable from user space, with less checking.
- * @x:   Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple variable from user space to kernel
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define __get_user(x,ptr) \
-  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-
-
-/**
- * __put_user: - Write a simple value into user space, with less checking.
- * @x:   Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only.  This function may sleep.
- *
- * This macro copies a single simple value from kernel space to user
- * space.  It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define __put_user(x,ptr) \
-  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __put_user_nocheck(x,ptr,size)                         \
-({                                                             \
-       long __pu_err;                                          \
-       __put_user_size((x),(ptr),(size),__pu_err,-EFAULT);     \
-       __pu_err;                                               \
-})
-
-#define __put_user_check(x,ptr,size)                                   \
-({                                                                     \
-       long __pu_err = -EFAULT;                                        \
-       __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       if (__addr_ok(__pu_addr))                                       \
-               __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
-       __pu_err;                                                       \
-})                                                     
-
 #define __put_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \
        retval = 0;                                                     \
@@ -143,47 +27,6 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       __asm__ __volatile__(                                           \
-               "1:     mov"itype" %"rtype"1,%2\n"                      \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "3:     mov %3,%0\n"                                    \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 8\n"                                     \
-               "       .quad 1b,3b\n"                                  \
-               ".previous"                                             \
-               : "=r"(err)                                             \
-               : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
-
-#define __get_user_nocheck(x,ptr,size)                         \
-({                                                             \
-       long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
-       __gu_err;                                               \
-})
-
-#define __get_user_check(x,ptr,size)                                   \
-({                                                                     \
-       long __gu_err, __gu_val;                                        \
-       __typeof__(*(ptr)) __user *__gu_addr = (ptr);                   \
-       __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);    \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
-       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;                  \
-       __gu_err;                                                       \
-})                                                     
-
 #define __get_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \
        retval = 0;                                                     \
@@ -196,95 +39,4 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       __asm__ __volatile__(                                           \
-               "1:     mov"itype" %2,%"rtype"1\n"                      \
-               "2:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "3:     mov %3,%0\n"                                    \
-               "       xor"itype" %"rtype"1,%"rtype"1\n"               \
-               "       jmp 2b\n"                                       \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .align 8\n"                                     \
-               "       .quad 1b,3b\n"                                  \
-               ".previous"                                             \
-               : "=r"(err), ltype (x)                                  \
-               : "m"(__m(addr)), "i"(errret), "0"(err))
-
-
-/*
- * Copy To/From Userspace
- */
-
-/* Handles exceptions in both to and from, but doesn't do access_ok */
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n);
-unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned n);
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned len); 
-unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 
-
-static always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 
-{ 
-    int ret = 0;
-    if (!__builtin_constant_p(size))
-        return __copy_from_user_ll(dst,(void *)src,size);
-    switch (size) { 
-    case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 
-        return ret;
-    case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
-        return ret;
-    case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
-        return ret;
-    case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
-        return ret; 
-    case 10:
-        __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
-        if (unlikely(ret)) return ret;
-        __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
-        return ret; 
-    case 16:
-        __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
-        if (unlikely(ret)) return ret;
-        __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
-        return ret; 
-    default:
-        return __copy_from_user_ll(dst,(void *)src,size); 
-    }
-}      
-
-static always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 
-{ 
-    int ret = 0;
-    if (!__builtin_constant_p(size))
-        return __copy_to_user_ll((void *)dst,src,size);
-    switch (size) { 
-    case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 
-        return ret;
-    case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
-        return ret;
-    case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
-        return ret;
-    case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
-        return ret; 
-    case 10:
-        __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
-        if (unlikely(ret)) return ret;
-        asm("":::"memory");
-        __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
-        return ret; 
-    case 16:
-        __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
-        if (unlikely(ret)) return ret;
-        asm("":::"memory");
-        __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
-        return ret; 
-    default:
-        return __copy_to_user_ll((void *)dst,src,size); 
-    }
-}      
-
-unsigned long clear_user(void __user *mem, unsigned long len);
-unsigned long __clear_user(void __user *mem, unsigned long len);
-
 #endif /* __X86_64_UACCESS_H */